Fix the TLB-flush logic. Epoch changes were broken.
* TLB flushes are timestamped using a global virtual 'clock' which ticks
* on any TLB flush on any processor.
*
- * Copyright (c) 2003, K A Fraser
+ * Copyright (c) 2003-2004, K A Fraser
*/
#include <xen/config.h>
#include <xen/softirq.h>
#include <asm/flushtlb.h>
+unsigned long tlbflush_epoch_changing;
u32 tlbflush_clock;
u32 tlbflush_time[NR_CPUS];
void tlb_clocktick(void)
{
u32 y, ny;
+ unsigned long flags;
+
+ local_irq_save(flags);
/* Tick the clock. 'y' contains the current time after the tick. */
ny = tlbflush_clock;
#ifdef CONFIG_SMP
if ( unlikely(((y = ny+1) & TLBCLOCK_EPOCH_MASK) == 0) )
{
- raise_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ);
- y = tlbflush_clock;
+ /* Epoch is changing: the first to detect this is the leader. */
+ if ( unlikely(!test_and_set_bit(0, &tlbflush_epoch_changing)) )
+ raise_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ);
+ /* The clock doesn't tick again until end of the epoch change. */
+ y--;
break;
}
#else
/* Update this CPU's timestamp to new time. */
tlbflush_time[smp_processor_id()] = y;
+
+ local_irq_restore(flags);
}
{
ack_APIC_irq();
perfc_incrc(ipis);
- if ( likely(test_and_clear_bit(smp_processor_id(), &flush_cpumask)) )
- local_flush_tlb();
+ local_flush_tlb();
+ clear_bit(smp_processor_id(), &flush_cpumask);
}
void flush_tlb_mask(unsigned long mask)
*/
void new_tlbflush_clock_period(void)
{
- spin_lock(&flush_lock);
-
- /* Someone may acquire the lock and execute the flush before us. */
- if ( ((tlbflush_clock+1) & TLBCLOCK_EPOCH_MASK) != 0 )
- goto out;
+ /* Only the leader gets here. Noone else should tick the clock. */
+ ASSERT(((tlbflush_clock+1) & TLBCLOCK_EPOCH_MASK) == 0);
+ /* Flush everyone else. We definitely flushed just before entry. */
if ( smp_num_cpus > 1 )
{
- /* Flush everyone else. We definitely flushed just before entry. */
+ spin_lock(&flush_lock);
flush_cpumask = ((1 << smp_num_cpus) - 1) & ~(1 << smp_processor_id());
send_IPI_allbutself(INVALIDATE_TLB_VECTOR);
while ( flush_cpumask != 0 )
rep_nop();
barrier();
}
+ spin_unlock(&flush_lock);
}
/* No need for atomicity: we are the only possible updater. */
tlbflush_clock++;
- out:
- spin_unlock(&flush_lock);
+ /* Finally, signal the end of the epoch-change protocol. */
+ wmb();
+ tlbflush_epoch_changing = 0;
+
+ /* In case we got to the end of the next epoch already. */
+ tlb_clocktick();
}
static void flush_tlb_all_pge_ipi(void* info)
* TLB flushes are timestamped using a global virtual 'clock' which ticks
* on any TLB flush on any processor.
*
- * Copyright (c) 2003, K A Fraser
+ * Copyright (c) 2003-2004, K A Fraser
*/
#ifndef __FLUSHTLB_H__
static inline int NEED_FLUSH(u32 cpu_stamp, u32 lastuse_stamp)
{
/*
- * Why does this work?
- * 1. XOR sets high-order bits determines if stamps from differing epochs.
- * 2. Subtraction sets high-order bits if 'cpu_stamp > lastuse_stamp'.
- * In either case a flush is unnecessary: we therefore OR the results from
- * (1) and (2), mask the high-order bits, and return the inverse.
+ * Worst case in which a flush really is required:
+ * CPU has not flushed since end of last epoch (cpu_stamp = 0x0000ffff).
+ * Clock has run to end of current epoch (clock = 0x0001ffff).
+ * Therefore maximum valid difference is 0x10000 (EPOCH_MASK + 1).
+ * N.B. The clock cannot run further until the CPU has flushed once more
+ * and updated its stamp to 0x1ffff, so this is as 'far out' as it can get.
*/
- return !(((lastuse_stamp^cpu_stamp)|(lastuse_stamp-cpu_stamp)) &
- ~TLBCLOCK_EPOCH_MASK);
+ return ((lastuse_stamp - cpu_stamp) <= (TLBCLOCK_EPOCH_MASK + 1));
}
+extern unsigned long tlbflush_epoch_changing;
extern u32 tlbflush_clock;
extern u32 tlbflush_time[NR_CPUS];